}
/* calls in xen/common code that are unused on ia64 */
-void synchronise_execution_state(unsigned long cpu_mask) { }
int grant_table_create(struct domain *d) { return 0; }
void grant_table_destroy(struct domain *d)
return stu.rax;
}
+#define switch_kernel_stack(_n,_c) ((void)0)
+
#elif defined(__i386__)
#define load_segments(_p, _n) ((void)0)
#define save_segments(_p) ((void)0)
#define clear_segments() ((void)0)
+static inline void switch_kernel_stack(struct exec_domain *n, unsigned int cpu)
+{
+ struct tss_struct *tss = &init_tss[cpu];
+ tss->esp1 = n->arch.kernel_sp;
+ tss->ss1 = n->arch.kernel_ss;
+}
+
#endif
#define loaddebug(_ed,_reg) \
save_segments(p);
}
- memcpy(stack_ec,
- &n->arch.user_ctxt,
- sizeof(*stack_ec));
-
- /* Maybe switch the debug registers. */
- if ( unlikely(n->arch.debugreg[7]) )
+ if ( !is_idle_task(n->domain) )
{
- loaddebug(&n->arch, 0);
- loaddebug(&n->arch, 1);
- loaddebug(&n->arch, 2);
- loaddebug(&n->arch, 3);
- /* no 4 and 5 */
- loaddebug(&n->arch, 6);
- loaddebug(&n->arch, 7);
- }
+ memcpy(stack_ec,
+ &n->arch.user_ctxt,
+ sizeof(*stack_ec));
- if ( !VMX_DOMAIN(n) )
- {
- SET_FAST_TRAP(&n->arch);
+ /* Maybe switch the debug registers. */
+ if ( unlikely(n->arch.debugreg[7]) )
+ {
+ loaddebug(&n->arch, 0);
+ loaddebug(&n->arch, 1);
+ loaddebug(&n->arch, 2);
+ loaddebug(&n->arch, 3);
+ /* no 4 and 5 */
+ loaddebug(&n->arch, 6);
+ loaddebug(&n->arch, 7);
+ }
-#ifdef __i386__
+ if ( !VMX_DOMAIN(n) )
{
- /* Switch the kernel ring-1 stack. */
- struct tss_struct *tss = &init_tss[cpu];
- tss->esp1 = n->arch.kernel_sp;
- tss->ss1 = n->arch.kernel_ss;
+ SET_FAST_TRAP(&n->arch);
+ switch_kernel_stack(n, cpu);
}
-#endif
}
set_bit(cpu, &n->domain->cpuset);
write_ptbase(n);
- clear_bit(cpu, &p->domain->cpuset);
-
__asm__ __volatile__ ( "lgdt %0" : "=m" (*n->arch.gdt) );
+ clear_bit(cpu, &p->domain->cpuset);
percpu_ctxt[cpu].curr_ed = n;
}
set_current(next);
- if ( ((realprev = percpu_ctxt[smp_processor_id()]. curr_ed) == next) ||
+ if ( ((realprev = percpu_ctxt[smp_processor_id()].curr_ed) == next) ||
is_idle_task(next->domain) )
{
local_irq_enable();
BUG();
}
-static void __synchronise_lazy_execstate(void *unused)
-{
- if ( percpu_ctxt[smp_processor_id()].curr_ed != current )
- {
- __context_switch();
- load_LDT(current);
- clear_segments();
- }
-}
-void synchronise_lazy_execstate(unsigned long cpuset)
+int __sync_lazy_execstate(void)
{
- smp_subset_call_function(__synchronise_lazy_execstate, NULL, 1, cpuset);
+ if ( percpu_ctxt[smp_processor_id()].curr_ed == current )
+ return 0;
+ __context_switch();
+ load_LDT(current);
+ clear_segments();
+ return 1;
}
unsigned long __hypercall_create_continuation(
if ( d != current->domain )
domain_pause(d);
- synchronise_lazy_execstate(~0UL);
+
+ sync_lazy_execstate_all();
printk("pt base=%lx sh_info=%x\n",
pagetable_val(d->exec_domain[0]->arch.guest_table)>>PAGE_SHIFT,
{
ack_APIC_irq();
perfc_incrc(ipis);
- if ( flush_va == FLUSHVA_ALL )
- local_flush_tlb();
- else
- local_flush_tlb_one(flush_va);
+ if ( !__sync_lazy_execstate() )
+ {
+ if ( flush_va == FLUSHVA_ALL )
+ local_flush_tlb();
+ else
+ local_flush_tlb_one(flush_va);
+ }
clear_bit(smp_processor_id(), &flush_cpumask);
}
return 0;
}
-/* Run a function on a subset of CPUs (may include local CPU). */
-int smp_subset_call_function(
- void (*func) (void *info), void *info, int wait, unsigned long cpuset)
-{
- struct call_data_struct data;
-
- ASSERT(local_irq_is_enabled());
-
- if ( cpuset & (1UL << smp_processor_id()) )
- {
- local_irq_disable();
- (*func)(info);
- local_irq_enable();
- }
-
- cpuset &= ((1UL << smp_num_cpus) - 1) & ~(1UL << smp_processor_id());
- if ( cpuset == 0 )
- return 0;
-
- data.func = func;
- data.info = info;
- data.started = data.finished = 0;
- data.wait = wait;
-
- spin_lock(&call_lock);
-
- call_data = &data;
- wmb();
-
- send_IPI_mask(cpuset, CALL_FUNCTION_VECTOR);
-
- while ( (wait ? data.finished : data.started) != cpuset )
- cpu_relax();
-
- spin_unlock(&call_lock);
-
- return 0;
-}
-
static void stop_this_cpu (void *dummy)
{
clear_bit(smp_processor_id(), &cpu_online_map);
* Force loading of currently-executing domain state on the specified set
* of CPUs. This is used to counteract lazy state switching where required.
*/
-void synchronise_lazy_execstate(unsigned long cpuset);
+#define sync_lazy_execstate_cpuset(_cpuset) flush_tlb_mask(_cpuset)
+#define sync_lazy_execstate_all() flush_tlb_all()
+extern int __sync_lazy_execstate(void);
extern void context_switch(
struct exec_domain *prev,
ASSERT(ed != current);
atomic_inc(&ed->pausecnt);
domain_sleep(ed);
- synchronise_lazy_execstate(ed->domain->cpuset & (1UL << ed->processor));
+ sync_lazy_execstate_cpuset(ed->domain->cpuset & (1UL << ed->processor));
}
static inline void domain_pause(struct domain *d)
domain_sleep(ed);
}
- synchronise_lazy_execstate(d->cpuset);
+ sync_lazy_execstate_cpuset(d->cpuset);
}
static inline void exec_domain_unpause(struct exec_domain *ed)
domain_sleep(ed);
}
- synchronise_lazy_execstate(d->cpuset);
+ sync_lazy_execstate_cpuset(d->cpuset);
}
static inline void domain_unpause_by_systemcontroller(struct domain *d)
*/
extern int smp_call_function(
void (*func) (void *info), void *info, int retry, int wait);
-extern int smp_subset_call_function(
- void (*func) (void *info), void *info, int wait, unsigned long cpuset);
/*
* True once the per process idle is forked
#define cpu_logical_map(cpu) 0
#define cpu_number_map(cpu) 0
#define smp_call_function(func,info,retry,wait) 0
-#define smp_subset_call_function(f,i,w,c) ({ if ( (c&1) ) (*f)(i); 0; })
#define cpu_online_map 1
#endif